import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import copy
from IPython.display import display, HTML, Markdown
import scipy.signal
def grayscale_heatmap(img):
return go.Heatmap(
z=img,
colorscale = [[0, 'rgb(0,0,0)'], [1, 'rgb(255,255,255)']],
showscale = False,
hoverinfo = "skip",
)
def convolution(img, kernel, boundary="fill"):
return scipy.signal.convolve2d(img, kernel, boundary=boundary)
# expects the kernel to have odd size
ksize_y = len(kernel)
ksize_x = len(kernel[0])
int(ksize_y / 2)
result = copy.deepcopy(img)
for y in range(len(img)):
for x in range(len(img[y])):
result[y][x] = 0
for ky in range(ksize_y):
for kx in range(ksize_x):
x_coord = (x + kx - int(ksize_x / 2)) % len(img[0]);
y_coord = (y + ky - int(ksize_y / 2)) % len(img);
pixel = img[y_coord][x_coord]
pixel *= kernel[ky][kx]
result[y][x] += pixel
return result
def kernel_normalize(kernel):
s = 0
for y in range(len(kernel)):
for x in range(len(kernel[y])):
s += kernel[y][x]
for y in range(len(kernel)):
for x in range(len(kernel[y])):
kernel[y][x] /= s
return kernel
def kernel_proposed():
return kernel_normalize([
[2,4,2],
[4,7.8,4],
[2,4,2]
])
def kernel_proposed6():
return kernel_normalize([
[2,4,2],
[4,7.92,4],
[2,4,2]
])
def kernel_github():
return [
[1/16, 1/8, 1/16],
[1/8, 1/4, 1/8],
[1/16, 1/8, 1/16],
]
def kernel_paper():
return [
[1/256, 1/64, 3/128, 1/64, 1/256],
[1/64, 1/16, 3/32, 1/16, 1/64],
[3/128, 3/32, 9/64, 3/32, 3/128],
[1/64, 1/16, 3/32, 1/16, 1/64],
[1/256, 1/64, 3/128, 1/64, 1/256],
]
def kernel_paper_cropped():
return kernel_normalize([
[1/16, 3/32, 1/16],
[3/32, 9/64, 3/32],
[1/16, 3/32, 1/16],
])
def h_convolution(img, scale, base_kernel, boundary="fill"):
kernel = []
for y in range(scale*(len(base_kernel) - 1)+1):
kernel.append([])
for x in range(scale*(len(base_kernel[int(y / scale)]) - 1)+1):
kernel[y].append(0)
if y % scale == 0 and x % scale == 0:
kernel[y][x] = base_kernel[int(y / scale)][int(x / scale)]
return convolution(img, kernel, boundary)
def texture_center_dot(size=1):
img = np.zeros((size,size))
for y in range(size):
for x in range(size):
img[y][x] = 0
img[int(size/2)][int(size/2)] = 1
return img
img = texture_center_dot()
kernel_scale = 1
display(Markdown("## From small to big kernel"))
fig = make_subplots(
rows=1, cols=4,
vertical_spacing=0.01,
horizontal_spacing=0.02,
)
fig.update_layout(
margin = dict(t=8, r=8, b=8, l=8),
width = 1200, height = 300,
autosize = False,
)
img = texture_center_dot()
kernel_scale = 1
for k in range(4):
img = h_convolution(img, kernel_scale, kernel_github())
fig.add_trace(
grayscale_heatmap(img),
row=1, col=k+1,
)
kernel_scale = int(kernel_scale * 2)
fig.update_xaxes(showticklabels = False, constrain="domain")
fig.update_yaxes(showticklabels = False, constrain="domain")
fig.layout.xaxis1.scaleanchor = "y1"
fig.layout.xaxis2.scaleanchor = "y2"
fig.layout.xaxis3.scaleanchor = "y3"
fig.layout.xaxis4.scaleanchor = "y4"
fig.show()
display(Markdown("## From big to small kernel"))
fig = make_subplots(
rows=1, cols=4,
vertical_spacing=0.01,
horizontal_spacing=0.02,
)
fig.update_layout(
margin = dict(t=8, r=8, b=8, l=8),
width = 1200, height = 300,
autosize = False,
)
img = texture_center_dot()
kernel_scale = 8
for k in range(4):
img = h_convolution(img, kernel_scale, kernel_github())
fig.add_trace(
grayscale_heatmap(img),
row=1, col=k+1,
)
kernel_scale = int(kernel_scale * 0.5)
fig.update_xaxes(showticklabels = False, constrain="domain")
fig.update_yaxes(showticklabels = False, constrain="domain")
fig.layout.xaxis1.scaleanchor = "y1"
fig.layout.xaxis2.scaleanchor = "y2"
fig.layout.xaxis3.scaleanchor = "y3"
fig.layout.xaxis4.scaleanchor = "y4"
fig.show()
display(Markdown(r"""
## Kernel comparison
|Kernel|Samples|Iterations|Total samples|Equivalent full kernel size|Relative efficiency|
|----|:----:|:----:|:----:|:----:|:----:|
|Proposed|9|4|36|17|8.00|
|Github - https://github.com/ZheyuanXie/CUDA-Path-Tracer-Denoising|9|4|36|17|8.00|
|F. Murtagh. Multiscale Transform Methods in Data Analysis<br>1D: ($\frac{1}{16}, \frac{1}{4}, \frac{3}{8}, \frac{1}{4}, \frac{1}{16}$)|25|3|75|13|2.25|
|F. Murtagh. Multiscale Transform Methods in Data Analysis - cropped to 3x3 and normalized<br>1D: ($\frac{1}{4}, \frac{3}{8}, \frac{1}{4}$) -> normalize|9|4|36|17|8.00|
<br>
"""))
fig = make_subplots(
rows=1, cols=4,
vertical_spacing=0.01,
horizontal_spacing=0.02,
subplot_titles=("Proposed", "Github", "F. Murtagh", "F. Murtagh<br>(cropped & normalized)"),
)
fig.update_layout(
margin = dict(t=48, r=8, b=8, l=8),
width = 800,
height = 250,
autosize = False,
)
img = texture_center_dot()
kernel_scale = 1
for k in range(4):
img = h_convolution(img, kernel_scale, kernel_proposed())
kernel_scale = int(kernel_scale * 2.0)
fig.add_trace(
grayscale_heatmap(img),
row=1, col=1,
)
img = texture_center_dot()
kernel_scale = 1
for k in range(4):
img = h_convolution(img, kernel_scale, kernel_github())
kernel_scale = int(kernel_scale * 2.0)
fig.add_trace(
grayscale_heatmap(img),
row=1, col=2,
)
img = texture_center_dot()
kernel_scale = 1
for k in range(3):
img = h_convolution(img, kernel_scale, kernel_paper())
kernel_scale = int(kernel_scale * 2.0)
fig.add_trace(
grayscale_heatmap(img),
row=1, col=3,
)
img = texture_center_dot()
kernel_scale = 1
for k in range(4):
img = h_convolution(img, kernel_scale, kernel_paper_cropped())
kernel_scale = int(kernel_scale * 2.0)
fig.add_trace(
grayscale_heatmap(img),
row=1, col=4,
)
fig.update_xaxes(showticklabels = False, constrain="domain")
fig.update_yaxes(showticklabels = False, constrain="domain")
fig.layout.xaxis1.scaleanchor = "y1"
fig.layout.xaxis2.scaleanchor = "y2"
fig.layout.xaxis3.scaleanchor = "y3"
fig.layout.xaxis4.scaleanchor = "y4"
fig.show()
| Kernel | Samples | Iterations | Total samples | Equivalent full kernel size | Relative efficiency |
|---|---|---|---|---|---|
| Proposed | 9 | 4 | 36 | 17 | 8.00 |
| Github - https://github.com/ZheyuanXie/CUDA-Path-Tracer-Denoising | 9 | 4 | 36 | 17 | 8.00 |
| F. Murtagh. Multiscale Transform Methods in Data Analysis 1D: ($\frac{1}{16}, \frac{1}{4}, \frac{3}{8}, \frac{1}{4}, \frac{1}{16}$) |
25 | 3 | 75 | 13 | 2.25 |
| F. Murtagh. Multiscale Transform Methods in Data Analysis - cropped to 3x3 and normalized 1D: ($\frac{1}{4}, \frac{3}{8}, \frac{1}{4}$) -> normalize |
9 | 4 | 36 | 17 | 8.00 |
display(Markdown(r"""
## Artifact comparison
"""))
fig = make_subplots(
rows=2, cols=2,
vertical_spacing=0.05,
horizontal_spacing=0.02,
subplot_titles=("Github kernel - 6 iterations", "Proposed kernel - 6 iterations", "Github kernel - 4 iterations", "Proposed kernel \n 4 iterations"),
)
fig.update_layout(
margin = dict(t=48, r=8, b=8, l=8),
width = 800, height = 800,
autosize = False
)
img = texture_center_dot()
kernel_scale = 1
for k in range(6):
img = h_convolution(img, kernel_scale, kernel_github())
kernel_scale = int(kernel_scale * 2)
fig.add_trace(
grayscale_heatmap(img),
row=1, col=1,
)
img = texture_center_dot()
kernel_scale = 1
for k in range(6):
img = h_convolution(img, kernel_scale, kernel_proposed())
kernel_scale = int(kernel_scale * 2)
fig.add_trace(
grayscale_heatmap(img),
row=1, col=2,
)
img = texture_center_dot()
kernel_scale = 1
for k in range(4):
img = h_convolution(img, kernel_scale, kernel_github())
kernel_scale = int(kernel_scale * 2)
fig.add_trace(
grayscale_heatmap(img),
row=2, col=1,
)
img = texture_center_dot()
kernel_scale = 1
for k in range(4):
img = h_convolution(img, kernel_scale, kernel_proposed())
kernel_scale = int(kernel_scale * 2)
fig.add_trace(
grayscale_heatmap(img),
row=2, col=2,
)
fig.update_xaxes(showticklabels = False, constrain="domain")
fig.update_yaxes(showticklabels = False, constrain="domain")
fig.layout.xaxis1.scaleanchor = "y1"
fig.layout.xaxis2.scaleanchor = "y2"
fig.layout.xaxis3.scaleanchor = "y3"
fig.layout.xaxis4.scaleanchor = "y4"
fig.show()
display(Markdown(r"""
### Github kernel
https://github.com/ZheyuanXie/CUDA-Path-Tracer-Denoising<br>
<img src="https://raw.githubusercontent.com/ZheyuanXie/CUDA-Path-Tracer-Denoising/master/img/atrous_kernel.png" width="600">
**Proposed** kernel has slightly reduced center coefficient.<br>
It reduces the bright cross on 4 iterstions without significant artifacts,<br>
but creates regular artifacts on 6 iterations.
It appears as the number of iterations increases, the kernel becomes more sensitive to alterations
"""))
fig = make_subplots(
rows=2, cols=3,
vertical_spacing=0.1,
horizontal_spacing=0.02,
subplot_titles=(
"Github kernel <br> 6 iterations", "Proposed kernel <br> 6 iterations", "Difference",
"Github kernel <br> 4 iterations", "Proposed kernel <br> 4 iterations", "Difference"
),
)
fig.update_layout(
margin = dict(t=48, r=8, b=8, l=8),
width = 900, height = 600,
autosize = False
)
img = texture_center_dot()
kernel_scale = 1
for k in range(6):
img = h_convolution(img, kernel_scale, kernel_github())
kernel_scale = int(kernel_scale * 2)
fig.add_trace(
grayscale_heatmap(img),
row=1, col=1,
)
img2 = texture_center_dot()
kernel_scale = 1
for k in range(6):
img2 = h_convolution(img2, kernel_scale, kernel_proposed6())
kernel_scale = int(kernel_scale * 2)
fig.add_trace(
grayscale_heatmap(img),
row=1, col=2,
)
img = np.absolute(np.subtract(img, img2))
fig.add_trace(
grayscale_heatmap(img),
row=1, col=3,
)
img = texture_center_dot()
kernel_scale = 1
for k in range(4):
img = h_convolution(img, kernel_scale, kernel_github())
kernel_scale = int(kernel_scale * 2)
fig.add_trace(
grayscale_heatmap(img),
row=2, col=1,
)
img2 = texture_center_dot()
kernel_scale = 1
for k in range(4):
img2 = h_convolution(img2, kernel_scale, kernel_proposed())
kernel_scale = int(kernel_scale * 2)
fig.add_trace(
grayscale_heatmap(img),
row=2, col=2,
)
img = np.absolute(np.subtract(img, img2))
fig.add_trace(
grayscale_heatmap(img),
row=2, col=3,
)
fig.update_xaxes(showticklabels = False, constrain="domain")
fig.update_yaxes(showticklabels = False, constrain="domain")
fig.layout.xaxis1.scaleanchor = "y1"
fig.layout.xaxis2.scaleanchor = "y2"
fig.layout.xaxis3.scaleanchor = "y3"
fig.layout.xaxis4.scaleanchor = "y4"
fig.layout.xaxis5.scaleanchor = "y5"
fig.layout.xaxis6.scaleanchor = "y6"
fig.show()
display(Markdown(r"""
With 6 iterations reducing center weight can not significantly reduce the bright cross without creating artifacts.<br>
It appears kernel alterations are only feasible for small number of iterations (less than 6)
"""))
https://github.com/ZheyuanXie/CUDA-Path-Tracer-Denoising

Proposed kernel has slightly reduced center coefficient.
It reduces the bright cross on 4 iterstions without significant artifacts,
but creates regular artifacts on 6 iterations.
It appears as the number of iterations increases, the kernel becomes more sensitive to alterations
With 6 iterations reducing center weight can not significantly reduce the bright cross without creating artifacts.
It appears kernel alterations are only feasible for small number of iterations (less than 6)
display(Markdown(r"""
## 1D convergence test
"""))
fig = make_subplots(
rows=1, cols=5,
vertical_spacing=0.1,
horizontal_spacing=0.02,
)
fig.update_layout(
margin = dict(t=8, r=8, b=8, l=8),
width = 1200, height = 400,
autosize = False,
legend_traceorder="reversed",
)
def convolution_1D(arr, kernel):
buf = []
for position in range(len(arr)):
value = 0
for kernel_position in range(len(kernel)):
sampling_position = position + kernel_position - int(len(kernel) / 2)
sampling_position = (sampling_position + len(arr)) % len(arr)
value += arr[sampling_position] * kernel[kernel_position]
buf.append(value)
return buf
def demo_kernel(base_kernel, color, iterations=5, name="", start=0, arr=None, big_to_small=False, total_iterations=0):
if arr == None:
arr = []
for i in range(61):
arr.append(0)
arr[30] = 1
gap = 1
if big_to_small:
for i in range(total_iterations - start - 2):
gap *= 2
else:
for i in range(start):
gap *= 2
for iteration in range(iterations):
fig.add_trace(
go.Scatter(
x=list(range(len(arr))),
y=arr,
mode='lines',
name=name,
showlegend = True if iteration == 0 and start == 0 else False,
line_color=color,
),
row = 1,
col = iteration + 1 + start,
)
kernel = []
for base_kernel_index in range(len(base_kernel) - 1):
kernel.append(base_kernel[base_kernel_index])
for _ in range(gap - 1):
kernel.append(0)
kernel.append(base_kernel[-1])
gap = int(gap * (0.5 if big_to_small else 2))
arr = convolution_1D(arr, kernel)
return arr
fig = make_subplots(
rows=1, cols=6,
vertical_spacing=0.1,
horizontal_spacing=0.02,
)
fig.update_layout(
margin = dict(t=8, r=8, b=8, l=8),
width = 1200, height = 400,
autosize = False,
legend_traceorder="reversed",
)
display(Markdown(r"""
|||
|----|----|
|F. Murtagh|F. Murtagh. Multiscale Transform Methods in Data Analysis<br>($\frac{1}{16}, \frac{1}{4}, \frac{3}{8}, \frac{1}{4}, \frac{1}{16}$)|
|F. Murtagh (cropped and normalized)|($\frac{1}{4}, \frac{3}{8}, \frac{1}{4}$) -> normalize -> (0.28, 0.42, 0.28)|
|Github|<br>https://github.com/ZheyuanXie/CUDA-Path-Tracer-Denoising<br>$(\frac{1}{4}, \frac{1}{2}, \frac{1}{4})$|
"""))
cropped = [1/4,3/8,1/4]
cropped_sum = sum(cropped)
for i in range(len(cropped)):
cropped[i] /= cropped_sum
demo_kernel(cropped, color="goldenrod", name="F. Murtagh (cropped)")
demo_kernel([1/16,1/4,3/8,1/4,1/16], color="mediumpurple", name="F. Murtagh")
demo_kernel([1/4,1/2,1/4], color="green", name="Github")
#fig.legend(["F. Murtagh", "F. Murtagh (cropped)", "Github"], loc="upper left", fontsize=16)
#plt.show()
fig.show()
display(Markdown(r"""
Cropped to 3 samples F. Murtagh kernel does not converge at all, even though it is quite close to github kernel.<br>
Github kernel converges perfectly to a triangle.<br><br>
"""))
fig = make_subplots(
rows=1, cols=6,
vertical_spacing=0.1,
horizontal_spacing=0.02,
)
fig.update_layout(
margin = dict(t=8, r=8, b=8, l=8),
width = 1200, height = 400,
autosize = False,
legend_traceorder="reversed",
)
demo_kernel([0.253,0.494,0.253], color="mediumpurple", iterations=6, name="Proposed")
fig.show()
display(Markdown(r"""
Factorized into 1D, proposed kernel does not converge smoothly.<br>
But in 2D it gets rid of the bright cross at the cost of marginally less smooth distance fading.
It appears constructing a good 2D kernel is more complex than just multiplying row-vector and column-vector of a good 1D kernel.
**Edit:**
Proposed kernel will only converge in 2D with small number of iterations (less than 6),<br>
with 6+iterations it generates artifacts.
It appears 2D convergence and 1D convergence are connected, but on small number of iterations 2D convergence is possible without 1D convergence.
Also, the theory of the kernel becoming more sensitive to changes with increased iteration count is confirmed
**Idea**: use modified kernel on first iterations, then switch to non-modified. Is it better to start with large of small kernel?
"""))
fig = make_subplots(
rows=1, cols=6,
vertical_spacing=0.1,
horizontal_spacing=0.02,
)
fig.update_layout(
margin = dict(t=8, r=8, b=8, l=8),
width = 1200, height = 400,
autosize = False,
legend_traceorder="reversed",
)
arr = demo_kernel([0.253,0.494,0.253], color="mediumpurple", iterations=3, name="3 x Proposed + 3 x Github<br>small to big")
demo_kernel([1/4,1/2,1/4], color="mediumpurple", iterations=3,start=3, arr=arr)
fig.show()
fig = make_subplots(
rows=1, cols=6,
vertical_spacing=0.1,
horizontal_spacing=0.02,
)
fig.update_layout(
margin = dict(t=8, r=8, b=8, l=8),
width = 1200, height = 400,
autosize = False,
legend_traceorder="reversed",
)
arr = demo_kernel([0.253,0.494,0.253], color="green", iterations=3, name="3 x Proposed + 3 x Github<br>big to small", big_to_small=True, total_iterations=6)
demo_kernel([1/4,1/2,1/4], color="green", iterations=3, start=3, arr=arr, big_to_small=True, total_iterations=6)
fig.show()
display(Markdown(r"""
Smooth convergence is achieved when going big to small kernel, using modified kernel on first 3 (big) iterations
**Question**: will it converge when going small to big and using modified kernel on **last** (big) iterations?
"""))
fig = make_subplots(
rows=1, cols=6,
vertical_spacing=0.1,
horizontal_spacing=0.02,
)
fig.update_layout(
margin = dict(t=8, r=8, b=8, l=8),
width = 1200, height = 400,
autosize = False,
legend_traceorder="reversed",
)
arr = demo_kernel([1/4,1/2,1/4], color="mediumpurple", iterations=3, name="3 x Github + 3 x Proposed <br>small to big")
demo_kernel([0.253,0.494,0.253], color="mediumpurple", iterations=3, start=3, arr=arr)
fig.show()
display(Markdown(r"""
It does! How about more custom iterations?
Testing 2 github + 4 custom:
"""))
fig = make_subplots(
rows=1, cols=6,
vertical_spacing=0.1,
horizontal_spacing=0.02,
)
fig.update_layout(
margin = dict(t=8, r=8, b=8, l=8),
width = 1200, height = 400,
autosize = False,
legend_traceorder="reversed",
)
arr = demo_kernel([1/4,1/2,1/4], color="green", iterations=2, name="2 x Github + 4 x Proposed<br>small to big")
demo_kernel([0.253,0.494,0.253], color="green", iterations=4, start=2, arr=arr)
fig.show()
display(Markdown(r"""
It does converge.
**Todo**: test in 2D
"""))
fig = make_subplots(
rows=1, cols=5,
vertical_spacing=0.1,
horizontal_spacing=0.02,
)
fig.update_layout(
margin = dict(t=8, r=8, b=8, l=8),
width = 1200, height = 400,
autosize = False,
legend_traceorder="reversed",
)
demo_kernel([1/3,1/3,1/3], name="(1/3, 1/3, 1/3)", color="mediumpurple")
fig.show()
display(Markdown(r"""
<br>
Kernel $(\frac{1}{3}, \frac{1}{3}, \frac{1}{3})$ does not converge. **Need to figure out the convergence condition**.
**ToDo**: make a demo comparing 6 iterations of 1D kernel with different center values
"""))
| F. Murtagh | F. Murtagh. Multiscale Transform Methods in Data Analysis ($\frac{1}{16}, \frac{1}{4}, \frac{3}{8}, \frac{1}{4}, \frac{1}{16}$) |
| F. Murtagh (cropped and normalized) | ($\frac{1}{4}, \frac{3}{8}, \frac{1}{4}$) -> normalize -> (0.28, 0.42, 0.28) |
| Github | https://github.com/ZheyuanXie/CUDA-Path-Tracer-Denoising $(\frac{1}{4}, \frac{1}{2}, \frac{1}{4})$ |
Cropped to 3 samples F. Murtagh kernel does not converge at all, even though it is quite close to github kernel.
Github kernel converges perfectly to a triangle.
Factorized into 1D, proposed kernel does not converge smoothly.
But in 2D it gets rid of the bright cross at the cost of marginally less smooth distance fading.
It appears constructing a good 2D kernel is more complex than just multiplying row-vector and column-vector of a good 1D kernel.
Edit:
Proposed kernel will only converge in 2D with small number of iterations (less than 6),
with 6+iterations it generates artifacts.
It appears 2D convergence and 1D convergence are connected, but on small number of iterations 2D convergence is possible without 1D convergence.
Also, the theory of the kernel becoming more sensitive to changes with increased iteration count is confirmed
Idea: use modified kernel on first iterations, then switch to non-modified. Is it better to start with large of small kernel?
Smooth convergence is achieved when going big to small kernel, using modified kernel on first 3 (big) iterations
Question: will it converge when going small to big and using modified kernel on last (big) iterations?
It does! How about more custom iterations?
Testing 2 github + 4 custom:
It does converge.
Todo: test in 2D
Kernel $(\frac{1}{3}, \frac{1}{3}, \frac{1}{3})$ does not converge. Need to figure out the convergence condition.
ToDo: make a demo comparing 6 iterations of 1D kernel with different center values
display(Markdown(r"""
1. Kernel = (1, center_coef, 1)
2. Normalize kernel (divide by sum)
3. Five convolutions with increasing sparse kernel size (padding with zeroes)
"""))
def test_1D_kernel(center_coef, scale):
base_kernel = [1, center_coef, 1]
s = sum(base_kernel)
for i in range(len(base_kernel)):
base_kernel[i] /= s
kernel = []
for i in range(scale*2+1):
if i % scale == 0:
kernel.append(base_kernel[int(i / scale)])
else:
kernel.append(0)
return kernel
fig = plt.figure(figsize=(15,20))
plt.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=0.8, wspace=0, hspace=0.5)
for i in range(-10, 20):
center_coef = 2 + i*0.1;
arr = [0 for x in range(61)]
arr[30] = 1
kernel_scale = 16
for k in range(5):
kernel = test_1D_kernel(center_coef, kernel_scale)
arr = convolution_1D(arr, kernel)
kernel_scale = int(kernel_scale * 0.5)
sp = fig.add_subplot(8,5,i+11)
sp.plot(arr)
sp.set_title("center coef: " + str(round(center_coef,1)))
sp.axis('off')
if center_coef == 2:
autoAxis = sp.axis()
rec = mpl.patches.Rectangle((autoAxis[0],autoAxis[2]-0.002),(autoAxis[1]-autoAxis[0]),(autoAxis[3]-autoAxis[2]+0.01),fill=True,linewidth=0, alpha=0.15, color="red")
rec = sp.add_patch(rec)
rec.set_clip_on(False)
plt.show()
display(Markdown(r"""
<br>
Only one (github) kernel converges: $(\frac{1}{4}, \frac{1}{2}, \frac{1}{4})$<br>
Moving away from this value steadily increases noise
<br><br>
**Question:** is there only one converging kernel for each given size, or larger sizes can have multiple different convering kernels?
"""))
Only one (github) kernel converges: $(\frac{1}{4}, \frac{1}{2}, \frac{1}{4})$
Moving away from this value steadily increases noise
Question: is there only one converging kernel for each given size, or larger sizes can have multiple different convering kernels?
display(Markdown(r"""
## Plotly
Testing plotly, planning to switch from matplotlib.<br>
Matplotlib api is inconsistent, unpredictable and unwieldy.
"""))
fig = make_subplots(
rows=2, cols=2,
column_widths=[0.5, 0.5],
row_heights=[0.5, 0.5],
vertical_spacing=0.01,
horizontal_spacing=0.01,
)
fig.add_trace(
grayscale_heatmap(img),
row=1, col=1,
)
fig.add_trace(
grayscale_heatmap(img),
row=1, col=2,
)
fig.add_trace(
go.Scatter(
x=list(range(10)),
y=list(range(10)),
),
row=2, col=2,
)
fig.update_layout(
margin = dict(t=0,r=0,b=0,l=0),
width = 700, height = 700,
autosize = False,
)
fig.update_layout(
shapes=[
dict(type="rect", xref="x4", yref="y4",
x0=0, y0=0, x1=10, y1=10,
line = dict(
width=0,
),
fillcolor="black",
layer="below",
),
dict(type="rect", xref="x4", yref="y4",
x0=6, y0=3, x1=9, y1=7,
line = dict(
width = 3,
color = "red",
)
),
]
)
fig.update_xaxes(showticklabels = False)
fig.update_yaxes(showticklabels = False)
fig.update_xaxes(
showline=True, linewidth=2, linecolor='black', mirror=True, showticklabels=True,
row=2, col=2
)
fig.update_yaxes(
showline=True, linewidth=2, linecolor='black', mirror=True, showticklabels=True,
row=2, col=2
)
fig.show()
Testing plotly, planning to switch from matplotlib.
Matplotlib api is inconsistent, unpredictable and unwieldy.